From 53b617c7776f343831fba470fbede61eecad2395 Mon Sep 17 00:00:00 2001 From: "sos22@douglas.cl.cam.ac.uk" Date: Fri, 27 Jan 2006 21:38:55 +0100 Subject: [PATCH] Bug fix: we need to eagerly synchronise th HL2, since Xen relies on it (via the guest linear table), and faults to that don't get sent to the shadow mode infrastructure, and so we can't do it lazily. Signed-off-by: Steven Smith, sos22@cam.ac.uk --- xen/arch/x86/shadow32.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/shadow32.c b/xen/arch/x86/shadow32.c index 964e26d7c7..352e93b270 100644 --- a/xen/arch/x86/shadow32.c +++ b/xen/arch/x86/shadow32.c @@ -1456,7 +1456,9 @@ shadow_hl2_table(struct domain *d, unsigned long gpfn, unsigned long gmfn, { unsigned long hl2mfn; l1_pgentry_t *hl2; + l2_pgentry_t *gpgd; int limit; + int x; ASSERT(PGT_base_page_table == PGT_l2_page_table); @@ -1495,6 +1497,11 @@ shadow_hl2_table(struct domain *d, unsigned long gpfn, unsigned long gmfn, l1e_from_pfn(hl2mfn, __PAGE_HYPERVISOR); } + gpgd = map_domain_page(gmfn); + for (x = 0; x < DOMAIN_ENTRIES_PER_L2_PAGETABLE; x++) + validate_hl2e_change(d, gpgd[x], &hl2[x]); + unmap_domain_page(gpgd); + unmap_domain_page(hl2); return hl2mfn; @@ -2797,8 +2804,9 @@ void shadow_l2_normal_pt_update( unsigned long pa, l2_pgentry_t gpde, struct domain_mmap_cache *cache) { - unsigned long sl2mfn; + unsigned long sl2mfn, hl2mfn; l2_pgentry_t *spl2e; + l1_pgentry_t *hl2e; shadow_lock(d); @@ -2812,6 +2820,15 @@ void shadow_l2_normal_pt_update( &spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)]); unmap_domain_page_with_cache(spl2e, cache); } + hl2mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT, + PGT_hl2_shadow); + if ( hl2mfn ) + { + hl2e = map_domain_page(hl2mfn); + validate_hl2e_change(d, gpde, + &hl2e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)]); + unmap_domain_page(hl2e); + } shadow_unlock(d); } -- 2.30.2